crackerjack 0.33.0__py3-none-any.whl → 0.33.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of crackerjack might be problematic. Click here for more details.
- crackerjack/__main__.py +1350 -34
- crackerjack/adapters/__init__.py +17 -0
- crackerjack/adapters/lsp_client.py +358 -0
- crackerjack/adapters/rust_tool_adapter.py +194 -0
- crackerjack/adapters/rust_tool_manager.py +193 -0
- crackerjack/adapters/skylos_adapter.py +231 -0
- crackerjack/adapters/zuban_adapter.py +560 -0
- crackerjack/agents/base.py +7 -3
- crackerjack/agents/coordinator.py +271 -33
- crackerjack/agents/documentation_agent.py +9 -15
- crackerjack/agents/dry_agent.py +3 -15
- crackerjack/agents/formatting_agent.py +1 -1
- crackerjack/agents/import_optimization_agent.py +36 -180
- crackerjack/agents/performance_agent.py +17 -98
- crackerjack/agents/performance_helpers.py +7 -31
- crackerjack/agents/proactive_agent.py +1 -3
- crackerjack/agents/refactoring_agent.py +16 -85
- crackerjack/agents/refactoring_helpers.py +7 -42
- crackerjack/agents/security_agent.py +9 -48
- crackerjack/agents/test_creation_agent.py +356 -513
- crackerjack/agents/test_specialist_agent.py +0 -4
- crackerjack/api.py +6 -25
- crackerjack/cli/cache_handlers.py +204 -0
- crackerjack/cli/cache_handlers_enhanced.py +683 -0
- crackerjack/cli/facade.py +100 -0
- crackerjack/cli/handlers.py +224 -9
- crackerjack/cli/interactive.py +6 -4
- crackerjack/cli/options.py +642 -55
- crackerjack/cli/utils.py +2 -1
- crackerjack/code_cleaner.py +58 -117
- crackerjack/config/global_lock_config.py +8 -48
- crackerjack/config/hooks.py +53 -62
- crackerjack/core/async_workflow_orchestrator.py +24 -34
- crackerjack/core/autofix_coordinator.py +3 -17
- crackerjack/core/enhanced_container.py +4 -13
- crackerjack/core/file_lifecycle.py +12 -89
- crackerjack/core/performance.py +2 -2
- crackerjack/core/performance_monitor.py +15 -55
- crackerjack/core/phase_coordinator.py +104 -204
- crackerjack/core/resource_manager.py +14 -90
- crackerjack/core/service_watchdog.py +62 -95
- crackerjack/core/session_coordinator.py +149 -0
- crackerjack/core/timeout_manager.py +14 -72
- crackerjack/core/websocket_lifecycle.py +13 -78
- crackerjack/core/workflow_orchestrator.py +171 -174
- crackerjack/docs/INDEX.md +11 -0
- crackerjack/docs/generated/api/API_REFERENCE.md +10895 -0
- crackerjack/docs/generated/api/CLI_REFERENCE.md +109 -0
- crackerjack/docs/generated/api/CROSS_REFERENCES.md +1755 -0
- crackerjack/docs/generated/api/PROTOCOLS.md +3 -0
- crackerjack/docs/generated/api/SERVICES.md +1252 -0
- crackerjack/documentation/__init__.py +31 -0
- crackerjack/documentation/ai_templates.py +756 -0
- crackerjack/documentation/dual_output_generator.py +765 -0
- crackerjack/documentation/mkdocs_integration.py +518 -0
- crackerjack/documentation/reference_generator.py +977 -0
- crackerjack/dynamic_config.py +55 -50
- crackerjack/executors/async_hook_executor.py +10 -15
- crackerjack/executors/cached_hook_executor.py +117 -43
- crackerjack/executors/hook_executor.py +8 -34
- crackerjack/executors/hook_lock_manager.py +26 -183
- crackerjack/executors/individual_hook_executor.py +13 -11
- crackerjack/executors/lsp_aware_hook_executor.py +270 -0
- crackerjack/executors/tool_proxy.py +417 -0
- crackerjack/hooks/lsp_hook.py +79 -0
- crackerjack/intelligence/adaptive_learning.py +25 -10
- crackerjack/intelligence/agent_orchestrator.py +2 -5
- crackerjack/intelligence/agent_registry.py +34 -24
- crackerjack/intelligence/agent_selector.py +5 -7
- crackerjack/interactive.py +17 -6
- crackerjack/managers/async_hook_manager.py +0 -1
- crackerjack/managers/hook_manager.py +79 -1
- crackerjack/managers/publish_manager.py +44 -8
- crackerjack/managers/test_command_builder.py +1 -15
- crackerjack/managers/test_executor.py +1 -3
- crackerjack/managers/test_manager.py +98 -7
- crackerjack/managers/test_manager_backup.py +10 -9
- crackerjack/mcp/cache.py +2 -2
- crackerjack/mcp/client_runner.py +1 -1
- crackerjack/mcp/context.py +191 -68
- crackerjack/mcp/dashboard.py +7 -5
- crackerjack/mcp/enhanced_progress_monitor.py +31 -28
- crackerjack/mcp/file_monitor.py +30 -23
- crackerjack/mcp/progress_components.py +31 -21
- crackerjack/mcp/progress_monitor.py +50 -53
- crackerjack/mcp/rate_limiter.py +6 -6
- crackerjack/mcp/server_core.py +17 -16
- crackerjack/mcp/service_watchdog.py +2 -1
- crackerjack/mcp/state.py +4 -7
- crackerjack/mcp/task_manager.py +11 -9
- crackerjack/mcp/tools/core_tools.py +173 -32
- crackerjack/mcp/tools/error_analyzer.py +3 -2
- crackerjack/mcp/tools/execution_tools.py +8 -10
- crackerjack/mcp/tools/execution_tools_backup.py +42 -30
- crackerjack/mcp/tools/intelligence_tool_registry.py +7 -5
- crackerjack/mcp/tools/intelligence_tools.py +5 -2
- crackerjack/mcp/tools/monitoring_tools.py +33 -70
- crackerjack/mcp/tools/proactive_tools.py +24 -11
- crackerjack/mcp/tools/progress_tools.py +5 -8
- crackerjack/mcp/tools/utility_tools.py +20 -14
- crackerjack/mcp/tools/workflow_executor.py +62 -40
- crackerjack/mcp/websocket/app.py +8 -0
- crackerjack/mcp/websocket/endpoints.py +352 -357
- crackerjack/mcp/websocket/jobs.py +40 -57
- crackerjack/mcp/websocket/monitoring_endpoints.py +2935 -0
- crackerjack/mcp/websocket/server.py +7 -25
- crackerjack/mcp/websocket/websocket_handler.py +6 -17
- crackerjack/mixins/__init__.py +0 -2
- crackerjack/mixins/error_handling.py +1 -70
- crackerjack/models/config.py +12 -1
- crackerjack/models/config_adapter.py +49 -1
- crackerjack/models/protocols.py +122 -122
- crackerjack/models/resource_protocols.py +55 -210
- crackerjack/monitoring/ai_agent_watchdog.py +13 -13
- crackerjack/monitoring/metrics_collector.py +426 -0
- crackerjack/monitoring/regression_prevention.py +8 -8
- crackerjack/monitoring/websocket_server.py +643 -0
- crackerjack/orchestration/advanced_orchestrator.py +11 -6
- crackerjack/orchestration/coverage_improvement.py +3 -3
- crackerjack/orchestration/execution_strategies.py +26 -6
- crackerjack/orchestration/test_progress_streamer.py +8 -5
- crackerjack/plugins/base.py +2 -2
- crackerjack/plugins/hooks.py +7 -0
- crackerjack/plugins/managers.py +11 -8
- crackerjack/security/__init__.py +0 -1
- crackerjack/security/audit.py +6 -35
- crackerjack/services/anomaly_detector.py +392 -0
- crackerjack/services/api_extractor.py +615 -0
- crackerjack/services/backup_service.py +2 -2
- crackerjack/services/bounded_status_operations.py +15 -152
- crackerjack/services/cache.py +127 -1
- crackerjack/services/changelog_automation.py +395 -0
- crackerjack/services/config.py +15 -9
- crackerjack/services/config_merge.py +19 -80
- crackerjack/services/config_template.py +506 -0
- crackerjack/services/contextual_ai_assistant.py +48 -22
- crackerjack/services/coverage_badge_service.py +171 -0
- crackerjack/services/coverage_ratchet.py +27 -25
- crackerjack/services/debug.py +3 -3
- crackerjack/services/dependency_analyzer.py +460 -0
- crackerjack/services/dependency_monitor.py +14 -11
- crackerjack/services/documentation_generator.py +491 -0
- crackerjack/services/documentation_service.py +675 -0
- crackerjack/services/enhanced_filesystem.py +6 -5
- crackerjack/services/enterprise_optimizer.py +865 -0
- crackerjack/services/error_pattern_analyzer.py +676 -0
- crackerjack/services/file_hasher.py +1 -1
- crackerjack/services/git.py +8 -25
- crackerjack/services/health_metrics.py +10 -8
- crackerjack/services/heatmap_generator.py +735 -0
- crackerjack/services/initialization.py +11 -30
- crackerjack/services/input_validator.py +5 -97
- crackerjack/services/intelligent_commit.py +327 -0
- crackerjack/services/log_manager.py +15 -12
- crackerjack/services/logging.py +4 -3
- crackerjack/services/lsp_client.py +628 -0
- crackerjack/services/memory_optimizer.py +19 -87
- crackerjack/services/metrics.py +42 -33
- crackerjack/services/parallel_executor.py +9 -67
- crackerjack/services/pattern_cache.py +1 -1
- crackerjack/services/pattern_detector.py +6 -6
- crackerjack/services/performance_benchmarks.py +18 -59
- crackerjack/services/performance_cache.py +20 -81
- crackerjack/services/performance_monitor.py +27 -95
- crackerjack/services/predictive_analytics.py +510 -0
- crackerjack/services/quality_baseline.py +234 -0
- crackerjack/services/quality_baseline_enhanced.py +646 -0
- crackerjack/services/quality_intelligence.py +785 -0
- crackerjack/services/regex_patterns.py +618 -524
- crackerjack/services/regex_utils.py +43 -123
- crackerjack/services/secure_path_utils.py +5 -164
- crackerjack/services/secure_status_formatter.py +30 -141
- crackerjack/services/secure_subprocess.py +11 -92
- crackerjack/services/security.py +9 -41
- crackerjack/services/security_logger.py +12 -24
- crackerjack/services/server_manager.py +124 -16
- crackerjack/services/status_authentication.py +16 -159
- crackerjack/services/status_security_manager.py +4 -131
- crackerjack/services/thread_safe_status_collector.py +19 -125
- crackerjack/services/unified_config.py +21 -13
- crackerjack/services/validation_rate_limiter.py +5 -54
- crackerjack/services/version_analyzer.py +459 -0
- crackerjack/services/version_checker.py +1 -1
- crackerjack/services/websocket_resource_limiter.py +10 -144
- crackerjack/services/zuban_lsp_service.py +390 -0
- crackerjack/slash_commands/__init__.py +2 -7
- crackerjack/slash_commands/run.md +2 -2
- crackerjack/tools/validate_input_validator_patterns.py +14 -40
- crackerjack/tools/validate_regex_patterns.py +19 -48
- {crackerjack-0.33.0.dist-info → crackerjack-0.33.2.dist-info}/METADATA +196 -25
- crackerjack-0.33.2.dist-info/RECORD +229 -0
- crackerjack/CLAUDE.md +0 -207
- crackerjack/RULES.md +0 -380
- crackerjack/py313.py +0 -234
- crackerjack-0.33.0.dist-info/RECORD +0 -187
- {crackerjack-0.33.0.dist-info → crackerjack-0.33.2.dist-info}/WHEEL +0 -0
- {crackerjack-0.33.0.dist-info → crackerjack-0.33.2.dist-info}/entry_points.txt +0 -0
- {crackerjack-0.33.0.dist-info → crackerjack-0.33.2.dist-info}/licenses/LICENSE +0 -0
crackerjack/__main__.py
CHANGED
|
@@ -1,6 +1,14 @@
|
|
|
1
|
+
import typing as t
|
|
2
|
+
from pathlib import Path
|
|
3
|
+
|
|
1
4
|
import typer
|
|
2
5
|
from rich.console import Console
|
|
3
6
|
|
|
7
|
+
if t.TYPE_CHECKING:
|
|
8
|
+
from crackerjack.services.changelog_automation import ChangelogGenerator
|
|
9
|
+
|
|
10
|
+
from crackerjack.services.git import GitService
|
|
11
|
+
|
|
4
12
|
from .cli import (
|
|
5
13
|
CLI_OPTIONS,
|
|
6
14
|
BumpOption,
|
|
@@ -9,16 +17,21 @@ from .cli import (
|
|
|
9
17
|
handle_standard_mode,
|
|
10
18
|
setup_ai_agent_env,
|
|
11
19
|
)
|
|
20
|
+
from .cli.cache_handlers import _handle_cache_commands
|
|
12
21
|
from .cli.handlers import (
|
|
22
|
+
handle_config_updates,
|
|
13
23
|
handle_dashboard_mode,
|
|
14
24
|
handle_enhanced_monitor_mode,
|
|
15
25
|
handle_mcp_server,
|
|
16
26
|
handle_monitor_mode,
|
|
17
27
|
handle_restart_mcp_server,
|
|
18
28
|
handle_restart_websocket_server,
|
|
29
|
+
handle_restart_zuban_lsp,
|
|
19
30
|
handle_start_websocket_server,
|
|
31
|
+
handle_start_zuban_lsp,
|
|
20
32
|
handle_stop_mcp_server,
|
|
21
33
|
handle_stop_websocket_server,
|
|
34
|
+
handle_stop_zuban_lsp,
|
|
22
35
|
handle_watchdog_mode,
|
|
23
36
|
)
|
|
24
37
|
|
|
@@ -32,6 +45,8 @@ def _handle_monitoring_commands(
|
|
|
32
45
|
monitor: bool,
|
|
33
46
|
enhanced_monitor: bool,
|
|
34
47
|
dashboard: bool,
|
|
48
|
+
unified_dashboard: bool,
|
|
49
|
+
unified_dashboard_port: int | None,
|
|
35
50
|
watchdog: bool,
|
|
36
51
|
dev: bool,
|
|
37
52
|
) -> bool:
|
|
@@ -44,6 +59,12 @@ def _handle_monitoring_commands(
|
|
|
44
59
|
if dashboard:
|
|
45
60
|
handle_dashboard_mode(dev_mode=dev)
|
|
46
61
|
return True
|
|
62
|
+
if unified_dashboard:
|
|
63
|
+
from .cli.handlers import handle_unified_dashboard_mode
|
|
64
|
+
|
|
65
|
+
port = unified_dashboard_port or 8675
|
|
66
|
+
handle_unified_dashboard_mode(port=port, dev_mode=dev)
|
|
67
|
+
return True
|
|
47
68
|
if watchdog:
|
|
48
69
|
handle_watchdog_mode()
|
|
49
70
|
return True
|
|
@@ -88,10 +109,31 @@ def _handle_mcp_commands(
|
|
|
88
109
|
return False
|
|
89
110
|
|
|
90
111
|
|
|
112
|
+
def _handle_zuban_lsp_commands(
|
|
113
|
+
start_zuban_lsp: bool,
|
|
114
|
+
stop_zuban_lsp: bool,
|
|
115
|
+
restart_zuban_lsp: bool,
|
|
116
|
+
zuban_lsp_port: int,
|
|
117
|
+
zuban_lsp_mode: str,
|
|
118
|
+
) -> bool:
|
|
119
|
+
if start_zuban_lsp:
|
|
120
|
+
handle_start_zuban_lsp(port=zuban_lsp_port, mode=zuban_lsp_mode)
|
|
121
|
+
return True
|
|
122
|
+
if stop_zuban_lsp:
|
|
123
|
+
handle_stop_zuban_lsp()
|
|
124
|
+
return True
|
|
125
|
+
if restart_zuban_lsp:
|
|
126
|
+
handle_restart_zuban_lsp(port=zuban_lsp_port, mode=zuban_lsp_mode)
|
|
127
|
+
return True
|
|
128
|
+
return False
|
|
129
|
+
|
|
130
|
+
|
|
91
131
|
def _handle_server_commands(
|
|
92
132
|
monitor: bool,
|
|
93
133
|
enhanced_monitor: bool,
|
|
94
134
|
dashboard: bool,
|
|
135
|
+
unified_dashboard: bool,
|
|
136
|
+
unified_dashboard_port: int | None,
|
|
95
137
|
watchdog: bool,
|
|
96
138
|
start_websocket_server: bool,
|
|
97
139
|
stop_websocket_server: bool,
|
|
@@ -100,10 +142,23 @@ def _handle_server_commands(
|
|
|
100
142
|
stop_mcp_server: bool,
|
|
101
143
|
restart_mcp_server: bool,
|
|
102
144
|
websocket_port: int | None,
|
|
145
|
+
start_zuban_lsp: bool,
|
|
146
|
+
stop_zuban_lsp: bool,
|
|
147
|
+
restart_zuban_lsp: bool,
|
|
148
|
+
zuban_lsp_port: int,
|
|
149
|
+
zuban_lsp_mode: str,
|
|
103
150
|
dev: bool,
|
|
104
151
|
) -> bool:
|
|
105
152
|
return (
|
|
106
|
-
_handle_monitoring_commands(
|
|
153
|
+
_handle_monitoring_commands(
|
|
154
|
+
monitor,
|
|
155
|
+
enhanced_monitor,
|
|
156
|
+
dashboard,
|
|
157
|
+
unified_dashboard,
|
|
158
|
+
unified_dashboard_port,
|
|
159
|
+
watchdog,
|
|
160
|
+
dev,
|
|
161
|
+
)
|
|
107
162
|
or _handle_websocket_commands(
|
|
108
163
|
start_websocket_server,
|
|
109
164
|
stop_websocket_server,
|
|
@@ -116,9 +171,1047 @@ def _handle_server_commands(
|
|
|
116
171
|
restart_mcp_server,
|
|
117
172
|
websocket_port,
|
|
118
173
|
)
|
|
174
|
+
or _handle_zuban_lsp_commands(
|
|
175
|
+
start_zuban_lsp,
|
|
176
|
+
stop_zuban_lsp,
|
|
177
|
+
restart_zuban_lsp,
|
|
178
|
+
zuban_lsp_port,
|
|
179
|
+
zuban_lsp_mode,
|
|
180
|
+
)
|
|
181
|
+
)
|
|
182
|
+
|
|
183
|
+
|
|
184
|
+
def _generate_documentation(doc_service: t.Any, console: t.Any) -> bool:
|
|
185
|
+
"""Generate API documentation.
|
|
186
|
+
|
|
187
|
+
Returns True if successful, False if failed.
|
|
188
|
+
"""
|
|
189
|
+
console.print("📖 [bold blue]Generating API documentation...[/bold blue]")
|
|
190
|
+
success = doc_service.generate_full_api_documentation()
|
|
191
|
+
if success:
|
|
192
|
+
console.print(
|
|
193
|
+
"✅ [bold green]Documentation generated successfully![/bold green]"
|
|
194
|
+
)
|
|
195
|
+
return True
|
|
196
|
+
else:
|
|
197
|
+
console.print("❌ [bold red]Documentation generation failed![/bold red]")
|
|
198
|
+
return False
|
|
199
|
+
|
|
200
|
+
|
|
201
|
+
def _validate_documentation_files(doc_service: t.Any, console: t.Any) -> None:
|
|
202
|
+
"""Validate existing documentation files."""
|
|
203
|
+
from pathlib import Path
|
|
204
|
+
|
|
205
|
+
console.print("🔍 [bold blue]Validating documentation...[/bold blue]")
|
|
206
|
+
doc_paths = [Path("docs"), Path("README.md"), Path("CHANGELOG.md")]
|
|
207
|
+
existing_docs = [p for p in doc_paths if p.exists()]
|
|
208
|
+
|
|
209
|
+
if existing_docs:
|
|
210
|
+
issues = doc_service.validate_documentation(existing_docs)
|
|
211
|
+
if issues:
|
|
212
|
+
console.print(f"⚠️ Found {len(issues)} documentation issues:")
|
|
213
|
+
for issue in issues:
|
|
214
|
+
file_path = issue.get("path", issue.get("file", "unknown"))
|
|
215
|
+
console.print(f" - {file_path}: {issue['message']}")
|
|
216
|
+
else:
|
|
217
|
+
console.print(
|
|
218
|
+
"✅ [bold green]Documentation validation passed![/bold green]"
|
|
219
|
+
)
|
|
220
|
+
else:
|
|
221
|
+
console.print("⚠️ No documentation files found to validate.")
|
|
222
|
+
|
|
223
|
+
|
|
224
|
+
def _handle_documentation_commands(
|
|
225
|
+
generate_docs: bool, validate_docs: bool, console: Console, options: t.Any
|
|
226
|
+
) -> bool:
|
|
227
|
+
"""Handle documentation generation and validation commands.
|
|
228
|
+
|
|
229
|
+
Returns True if documentation commands were handled and execution should continue,
|
|
230
|
+
False if execution should return early.
|
|
231
|
+
"""
|
|
232
|
+
if not (generate_docs or validate_docs):
|
|
233
|
+
return True
|
|
234
|
+
|
|
235
|
+
from pathlib import Path
|
|
236
|
+
|
|
237
|
+
from crackerjack.services.documentation_service import DocumentationServiceImpl
|
|
238
|
+
|
|
239
|
+
pkg_path = Path("crackerjack")
|
|
240
|
+
doc_service = DocumentationServiceImpl(pkg_path=pkg_path, console=console)
|
|
241
|
+
|
|
242
|
+
if generate_docs:
|
|
243
|
+
if not _generate_documentation(doc_service, console):
|
|
244
|
+
return False
|
|
245
|
+
|
|
246
|
+
if validate_docs:
|
|
247
|
+
_validate_documentation_files(doc_service, console)
|
|
248
|
+
|
|
249
|
+
# Check if we should continue with other operations
|
|
250
|
+
return any(
|
|
251
|
+
[
|
|
252
|
+
options.run_tests,
|
|
253
|
+
options.strip_code,
|
|
254
|
+
options.all,
|
|
255
|
+
options.publish,
|
|
256
|
+
options.comp,
|
|
257
|
+
]
|
|
258
|
+
)
|
|
259
|
+
|
|
260
|
+
|
|
261
|
+
def _handle_changelog_commands(
|
|
262
|
+
generate_changelog: bool,
|
|
263
|
+
changelog_dry_run: bool,
|
|
264
|
+
changelog_version: str | None,
|
|
265
|
+
changelog_since: str | None,
|
|
266
|
+
console: Console,
|
|
267
|
+
options: t.Any,
|
|
268
|
+
) -> bool:
|
|
269
|
+
"""Handle changelog generation commands.
|
|
270
|
+
|
|
271
|
+
Returns True if changelog commands were handled and execution should continue,
|
|
272
|
+
False if execution should return early.
|
|
273
|
+
"""
|
|
274
|
+
if not (generate_changelog or changelog_dry_run):
|
|
275
|
+
return True
|
|
276
|
+
|
|
277
|
+
services = _setup_changelog_services(console)
|
|
278
|
+
changelog_path = services["pkg_path"] / "CHANGELOG.md"
|
|
279
|
+
|
|
280
|
+
if changelog_dry_run:
|
|
281
|
+
return _handle_changelog_dry_run(
|
|
282
|
+
services["generator"], changelog_since, console, options
|
|
283
|
+
)
|
|
284
|
+
|
|
285
|
+
if generate_changelog:
|
|
286
|
+
return _handle_changelog_generation(
|
|
287
|
+
services,
|
|
288
|
+
changelog_path,
|
|
289
|
+
changelog_version,
|
|
290
|
+
changelog_since,
|
|
291
|
+
console,
|
|
292
|
+
options,
|
|
293
|
+
)
|
|
294
|
+
|
|
295
|
+
return _should_continue_after_changelog(options)
|
|
296
|
+
|
|
297
|
+
|
|
298
|
+
def _setup_changelog_services(console: Console) -> dict[str, t.Any]:
|
|
299
|
+
"""Setup changelog services and dependencies."""
|
|
300
|
+
from pathlib import Path
|
|
301
|
+
|
|
302
|
+
from crackerjack.services.changelog_automation import ChangelogGenerator
|
|
303
|
+
from crackerjack.services.git import GitService
|
|
304
|
+
|
|
305
|
+
pkg_path = Path()
|
|
306
|
+
git_service = GitService(console, pkg_path)
|
|
307
|
+
changelog_generator = ChangelogGenerator(console, git_service)
|
|
308
|
+
|
|
309
|
+
return {
|
|
310
|
+
"pkg_path": pkg_path,
|
|
311
|
+
"git_service": git_service,
|
|
312
|
+
"generator": changelog_generator,
|
|
313
|
+
}
|
|
314
|
+
|
|
315
|
+
|
|
316
|
+
def _handle_changelog_dry_run(
|
|
317
|
+
generator: "ChangelogGenerator",
|
|
318
|
+
changelog_since: str | None,
|
|
319
|
+
console: Console,
|
|
320
|
+
options: t.Any,
|
|
321
|
+
) -> bool:
|
|
322
|
+
"""Handle changelog dry run preview."""
|
|
323
|
+
console.print("🔍 [bold blue]Previewing changelog generation...[/bold blue]")
|
|
324
|
+
entries = generator.generate_changelog_entries(changelog_since)
|
|
325
|
+
if entries:
|
|
326
|
+
generator._display_changelog_preview(entries)
|
|
327
|
+
console.print("✅ [bold green]Changelog preview completed![/bold green]")
|
|
328
|
+
else:
|
|
329
|
+
console.print("⚠️ No new changelog entries to generate.")
|
|
330
|
+
|
|
331
|
+
return _should_continue_after_changelog(options)
|
|
332
|
+
|
|
333
|
+
|
|
334
|
+
def _handle_changelog_generation(
|
|
335
|
+
services: dict[str, t.Any],
|
|
336
|
+
changelog_path: "Path",
|
|
337
|
+
changelog_version: str | None,
|
|
338
|
+
changelog_since: str | None,
|
|
339
|
+
console: Console,
|
|
340
|
+
options: t.Any,
|
|
341
|
+
) -> bool:
|
|
342
|
+
"""Handle actual changelog generation."""
|
|
343
|
+
console.print("📝 [bold blue]Generating changelog...[/bold blue]")
|
|
344
|
+
|
|
345
|
+
version = _determine_changelog_version(
|
|
346
|
+
services["git_service"], changelog_version, changelog_since, console, options
|
|
347
|
+
)
|
|
348
|
+
|
|
349
|
+
success = services["generator"].generate_changelog_from_commits(
|
|
350
|
+
changelog_path=changelog_path,
|
|
351
|
+
version=version,
|
|
352
|
+
since_version=changelog_since,
|
|
353
|
+
)
|
|
354
|
+
|
|
355
|
+
if success:
|
|
356
|
+
console.print(
|
|
357
|
+
f"✅ [bold green]Changelog updated for version {version}![/bold green]"
|
|
358
|
+
)
|
|
359
|
+
return _should_continue_after_changelog(options)
|
|
360
|
+
else:
|
|
361
|
+
console.print("❌ [bold red]Changelog generation failed![/bold red]")
|
|
362
|
+
return False
|
|
363
|
+
|
|
364
|
+
|
|
365
|
+
def _determine_changelog_version(
|
|
366
|
+
git_service: GitService,
|
|
367
|
+
changelog_version: str | None,
|
|
368
|
+
changelog_since: str | None,
|
|
369
|
+
console: Console,
|
|
370
|
+
options: t.Any,
|
|
371
|
+
) -> str:
|
|
372
|
+
"""Determine the version to use for changelog generation."""
|
|
373
|
+
if getattr(options, "auto_version", False) and not changelog_version:
|
|
374
|
+
try:
|
|
375
|
+
import asyncio
|
|
376
|
+
|
|
377
|
+
from crackerjack.services.version_analyzer import VersionAnalyzer
|
|
378
|
+
|
|
379
|
+
version_analyzer = VersionAnalyzer(console, git_service)
|
|
380
|
+
console.print(
|
|
381
|
+
"[cyan]🔍[/cyan] Analyzing version changes for intelligent changelog..."
|
|
382
|
+
)
|
|
383
|
+
|
|
384
|
+
recommendation = asyncio.run(
|
|
385
|
+
version_analyzer.recommend_version_bump(changelog_since)
|
|
386
|
+
)
|
|
387
|
+
version = recommendation.recommended_version
|
|
388
|
+
console.print(f"[green]✨[/green] Using AI-recommended version: {version}")
|
|
389
|
+
return version
|
|
390
|
+
except Exception as e:
|
|
391
|
+
console.print(f"[yellow]⚠️[/yellow] Version analysis failed: {e}")
|
|
392
|
+
return changelog_version or "Unreleased"
|
|
393
|
+
|
|
394
|
+
return changelog_version or "Unreleased"
|
|
395
|
+
|
|
396
|
+
|
|
397
|
+
def _should_continue_after_changelog(options: t.Any) -> bool:
|
|
398
|
+
"""Check if execution should continue after changelog operations."""
|
|
399
|
+
return any(
|
|
400
|
+
[
|
|
401
|
+
options.run_tests,
|
|
402
|
+
options.strip_code,
|
|
403
|
+
options.all,
|
|
404
|
+
options.publish,
|
|
405
|
+
options.comp,
|
|
406
|
+
]
|
|
407
|
+
)
|
|
408
|
+
|
|
409
|
+
|
|
410
|
+
def _handle_version_analysis(
|
|
411
|
+
auto_version: bool,
|
|
412
|
+
version_since: str | None,
|
|
413
|
+
accept_version: bool,
|
|
414
|
+
console: Console,
|
|
415
|
+
options: t.Any,
|
|
416
|
+
) -> bool:
|
|
417
|
+
"""Handle automatic version analysis and recommendations.
|
|
418
|
+
|
|
419
|
+
Returns True if version analysis was handled and execution should continue,
|
|
420
|
+
False if execution should return early.
|
|
421
|
+
"""
|
|
422
|
+
if not auto_version:
|
|
423
|
+
return True
|
|
424
|
+
|
|
425
|
+
from pathlib import Path
|
|
426
|
+
|
|
427
|
+
from rich.prompt import Confirm
|
|
428
|
+
|
|
429
|
+
from crackerjack.services.git import GitService
|
|
430
|
+
from crackerjack.services.version_analyzer import VersionAnalyzer
|
|
431
|
+
|
|
432
|
+
pkg_path = Path()
|
|
433
|
+
git_service = GitService(console, pkg_path)
|
|
434
|
+
version_analyzer = VersionAnalyzer(console, git_service)
|
|
435
|
+
|
|
436
|
+
try:
|
|
437
|
+
import asyncio
|
|
438
|
+
|
|
439
|
+
recommendation = asyncio.run(
|
|
440
|
+
version_analyzer.recommend_version_bump(version_since)
|
|
441
|
+
)
|
|
442
|
+
version_analyzer.display_recommendation(recommendation)
|
|
443
|
+
|
|
444
|
+
if accept_version or Confirm.ask(
|
|
445
|
+
f"\nAccept recommendation ({recommendation.bump_type.value})",
|
|
446
|
+
default=True,
|
|
447
|
+
):
|
|
448
|
+
console.print(
|
|
449
|
+
f"[green]✅ Version bump accepted: {recommendation.current_version} → {recommendation.recommended_version}[/green]"
|
|
450
|
+
)
|
|
451
|
+
# Note: Actual version bumping would integrate with existing publish/bump logic
|
|
452
|
+
else:
|
|
453
|
+
console.print("[yellow]❌ Version bump declined[/yellow]")
|
|
454
|
+
|
|
455
|
+
except Exception as e:
|
|
456
|
+
console.print(f"[red]❌ Version analysis failed: {e}[/red]")
|
|
457
|
+
|
|
458
|
+
# Check if we should continue with other operations
|
|
459
|
+
return any(
|
|
460
|
+
[
|
|
461
|
+
options.run_tests,
|
|
462
|
+
options.strip_code,
|
|
463
|
+
options.all,
|
|
464
|
+
options.publish,
|
|
465
|
+
options.comp,
|
|
466
|
+
]
|
|
119
467
|
)
|
|
120
468
|
|
|
121
469
|
|
|
470
|
+
def _setup_debug_and_verbose_flags(
|
|
471
|
+
ai_debug: bool, debug: bool, verbose: bool, options: t.Any
|
|
472
|
+
) -> tuple[bool, bool]:
|
|
473
|
+
"""Configure debug and verbose flags and update options.
|
|
474
|
+
|
|
475
|
+
Returns tuple of (ai_fix, verbose) flags.
|
|
476
|
+
"""
|
|
477
|
+
ai_fix = False
|
|
478
|
+
|
|
479
|
+
if ai_debug:
|
|
480
|
+
ai_fix = True
|
|
481
|
+
verbose = True
|
|
482
|
+
options.verbose = True
|
|
483
|
+
|
|
484
|
+
if debug:
|
|
485
|
+
verbose = True
|
|
486
|
+
options.verbose = True
|
|
487
|
+
|
|
488
|
+
return ai_fix, verbose
|
|
489
|
+
|
|
490
|
+
|
|
491
|
+
def _handle_heatmap_generation(
|
|
492
|
+
heatmap: bool,
|
|
493
|
+
heatmap_type: str,
|
|
494
|
+
heatmap_output: str | None,
|
|
495
|
+
console: Console,
|
|
496
|
+
) -> bool:
|
|
497
|
+
"""Handle heat map generation and visualization.
|
|
498
|
+
|
|
499
|
+
Returns True if execution should continue, False if should return early.
|
|
500
|
+
"""
|
|
501
|
+
if not heatmap:
|
|
502
|
+
return True
|
|
503
|
+
|
|
504
|
+
from pathlib import Path
|
|
505
|
+
|
|
506
|
+
from crackerjack.services.heatmap_generator import HeatMapGenerator
|
|
507
|
+
|
|
508
|
+
console.print("[cyan]🔥[/cyan] Generating heat map visualization...")
|
|
509
|
+
|
|
510
|
+
try:
|
|
511
|
+
generator = HeatMapGenerator()
|
|
512
|
+
project_root = Path.cwd()
|
|
513
|
+
|
|
514
|
+
# Generate the requested heat map type
|
|
515
|
+
if heatmap_type == "error_frequency":
|
|
516
|
+
heatmap_data = generator.generate_error_frequency_heatmap()
|
|
517
|
+
elif heatmap_type == "complexity":
|
|
518
|
+
heatmap_data = generator.generate_code_complexity_heatmap(project_root)
|
|
519
|
+
elif heatmap_type == "quality_metrics":
|
|
520
|
+
heatmap_data = generator.generate_quality_metrics_heatmap()
|
|
521
|
+
elif heatmap_type == "test_failures":
|
|
522
|
+
heatmap_data = generator.generate_test_failure_heatmap()
|
|
523
|
+
else:
|
|
524
|
+
console.print(f"[red]❌[/red] Unknown heat map type: {heatmap_type}")
|
|
525
|
+
return False
|
|
526
|
+
|
|
527
|
+
# Determine output format and save
|
|
528
|
+
if heatmap_output:
|
|
529
|
+
output_path = Path(heatmap_output)
|
|
530
|
+
if output_path.suffix.lower() == ".html":
|
|
531
|
+
# Generate HTML visualization
|
|
532
|
+
html_content = generator.generate_html_visualization(heatmap_data)
|
|
533
|
+
output_path.write_text(html_content, encoding="utf-8")
|
|
534
|
+
console.print(
|
|
535
|
+
f"[green]✅[/green] Heat map HTML saved to: {output_path}"
|
|
536
|
+
)
|
|
537
|
+
elif output_path.suffix.lower() in (".json", ".csv"):
|
|
538
|
+
# Export data in requested format
|
|
539
|
+
format_type = output_path.suffix[1:] # Remove the dot
|
|
540
|
+
generator.export_heatmap_data(heatmap_data, output_path, format_type)
|
|
541
|
+
console.print(
|
|
542
|
+
f"[green]✅[/green] Heat map data saved to: {output_path}"
|
|
543
|
+
)
|
|
544
|
+
else:
|
|
545
|
+
console.print(
|
|
546
|
+
f"[red]❌[/red] Unsupported output format: {output_path.suffix}"
|
|
547
|
+
)
|
|
548
|
+
return False
|
|
549
|
+
else:
|
|
550
|
+
# Default: save as HTML in current directory
|
|
551
|
+
default_filename = f"heatmap_{heatmap_type}.html"
|
|
552
|
+
html_content = generator.generate_html_visualization(heatmap_data)
|
|
553
|
+
Path(default_filename).write_text(html_content, encoding="utf-8")
|
|
554
|
+
console.print(
|
|
555
|
+
f"[green]✅[/green] Heat map HTML saved to: {default_filename}"
|
|
556
|
+
)
|
|
557
|
+
|
|
558
|
+
# Display summary
|
|
559
|
+
console.print(
|
|
560
|
+
f"[cyan]📊[/cyan] Heat map '{heatmap_data.title}' generated successfully"
|
|
561
|
+
)
|
|
562
|
+
console.print(f"[dim] • Cells: {len(heatmap_data.cells)}")
|
|
563
|
+
console.print(f"[dim] • X Labels: {len(heatmap_data.x_labels)}")
|
|
564
|
+
console.print(f"[dim] • Y Labels: {len(heatmap_data.y_labels)}")
|
|
565
|
+
|
|
566
|
+
return False # Exit after generating heat map
|
|
567
|
+
|
|
568
|
+
except Exception as e:
|
|
569
|
+
console.print(f"[red]❌[/red] Heat map generation failed: {e}")
|
|
570
|
+
return False
|
|
571
|
+
|
|
572
|
+
|
|
573
|
+
def _generate_anomaly_sample_data(detector: t.Any, console: Console) -> None:
|
|
574
|
+
"""Generate sample anomaly detection data for demonstration."""
|
|
575
|
+
from datetime import datetime, timedelta
|
|
576
|
+
|
|
577
|
+
base_time = datetime.now() - timedelta(hours=24)
|
|
578
|
+
metric_types = [
|
|
579
|
+
"test_pass_rate",
|
|
580
|
+
"coverage_percentage",
|
|
581
|
+
"complexity_score",
|
|
582
|
+
"execution_time",
|
|
583
|
+
"error_count",
|
|
584
|
+
]
|
|
585
|
+
|
|
586
|
+
console.print("[dim] • Collecting quality metrics from recent runs...")
|
|
587
|
+
|
|
588
|
+
# Add historical data points to establish baselines
|
|
589
|
+
for i in range(50):
|
|
590
|
+
timestamp = base_time + timedelta(minutes=i * 30)
|
|
591
|
+
for metric_type in metric_types:
|
|
592
|
+
value = _get_sample_metric_value(metric_type)
|
|
593
|
+
detector.add_metric(metric_type, value, timestamp)
|
|
594
|
+
|
|
595
|
+
|
|
596
|
+
def _get_sample_metric_value(metric_type: str) -> float:
|
|
597
|
+
"""Generate sample metric value with occasional anomalies."""
|
|
598
|
+
import random
|
|
599
|
+
|
|
600
|
+
is_anomaly = random.random() <= 0.1
|
|
601
|
+
|
|
602
|
+
if metric_type == "test_pass_rate":
|
|
603
|
+
return random.uniform(0.3, 0.7) if is_anomaly else random.uniform(0.85, 0.98)
|
|
604
|
+
elif metric_type == "coverage_percentage":
|
|
605
|
+
return random.uniform(40, 60) if is_anomaly else random.uniform(75, 95)
|
|
606
|
+
elif metric_type == "complexity_score":
|
|
607
|
+
return random.uniform(20, 35) if is_anomaly else random.uniform(8, 15)
|
|
608
|
+
elif metric_type == "execution_time":
|
|
609
|
+
return random.uniform(300, 600) if is_anomaly else random.uniform(30, 120)
|
|
610
|
+
# error_count
|
|
611
|
+
return random.uniform(8, 15) if is_anomaly else random.uniform(0, 3)
|
|
612
|
+
|
|
613
|
+
|
|
614
|
+
def _display_anomaly_results(
|
|
615
|
+
anomalies: list[t.Any], baselines: dict[str, t.Any], console: Console
|
|
616
|
+
) -> None:
|
|
617
|
+
"""Display anomaly detection analysis results."""
|
|
618
|
+
console.print("[cyan]📊[/cyan] Analysis complete:")
|
|
619
|
+
console.print(f"[dim] • Baselines established for {len(baselines)} metrics")
|
|
620
|
+
console.print(f"[dim] • {len(anomalies)} anomalies detected")
|
|
621
|
+
|
|
622
|
+
if anomalies:
|
|
623
|
+
console.print("\n[yellow]⚠️[/yellow] Detected anomalies:")
|
|
624
|
+
for anomaly in anomalies[:5]: # Show top 5 anomalies
|
|
625
|
+
severity_color = {
|
|
626
|
+
"low": "yellow",
|
|
627
|
+
"medium": "orange",
|
|
628
|
+
"high": "red",
|
|
629
|
+
"critical": "bright_red",
|
|
630
|
+
}.get(anomaly.severity, "white")
|
|
631
|
+
|
|
632
|
+
console.print(
|
|
633
|
+
f" • [{severity_color}]{anomaly.severity.upper()}[/{severity_color}] "
|
|
634
|
+
f"{anomaly.metric_type}: {anomaly.description}"
|
|
635
|
+
)
|
|
636
|
+
|
|
637
|
+
|
|
638
|
+
def _save_anomaly_report(
|
|
639
|
+
anomalies: list[t.Any],
|
|
640
|
+
baselines: dict[str, t.Any],
|
|
641
|
+
anomaly_sensitivity: float,
|
|
642
|
+
anomaly_report: str,
|
|
643
|
+
console: Console,
|
|
644
|
+
) -> None:
|
|
645
|
+
"""Save anomaly detection report to file."""
|
|
646
|
+
import json
|
|
647
|
+
from datetime import datetime
|
|
648
|
+
from pathlib import Path
|
|
649
|
+
|
|
650
|
+
report_data = {
|
|
651
|
+
"timestamp": datetime.now().isoformat(),
|
|
652
|
+
"summary": {
|
|
653
|
+
"total_anomalies": len(anomalies),
|
|
654
|
+
"baselines_count": len(baselines),
|
|
655
|
+
"sensitivity": anomaly_sensitivity,
|
|
656
|
+
},
|
|
657
|
+
"anomalies": [
|
|
658
|
+
{
|
|
659
|
+
"timestamp": a.timestamp.isoformat(),
|
|
660
|
+
"metric_type": a.metric_type,
|
|
661
|
+
"value": a.value,
|
|
662
|
+
"expected_range": a.expected_range,
|
|
663
|
+
"severity": a.severity,
|
|
664
|
+
"confidence": a.confidence,
|
|
665
|
+
"description": a.description,
|
|
666
|
+
}
|
|
667
|
+
for a in anomalies
|
|
668
|
+
],
|
|
669
|
+
"baselines": baselines,
|
|
670
|
+
}
|
|
671
|
+
|
|
672
|
+
report_path = Path(anomaly_report)
|
|
673
|
+
report_path.write_text(json.dumps(report_data, indent=2), encoding="utf-8")
|
|
674
|
+
console.print(f"[green]✅[/green] Anomaly detection report saved to: {report_path}")
|
|
675
|
+
|
|
676
|
+
|
|
677
|
+
def _handle_anomaly_detection(
|
|
678
|
+
anomaly_detection: bool,
|
|
679
|
+
anomaly_sensitivity: float,
|
|
680
|
+
anomaly_report: str | None,
|
|
681
|
+
console: Console,
|
|
682
|
+
) -> bool:
|
|
683
|
+
"""Handle ML-based anomaly detection for quality metrics.
|
|
684
|
+
|
|
685
|
+
Returns True if execution should continue, False if should return early.
|
|
686
|
+
"""
|
|
687
|
+
if not anomaly_detection:
|
|
688
|
+
return True
|
|
689
|
+
|
|
690
|
+
from crackerjack.services.anomaly_detector import AnomalyDetector
|
|
691
|
+
|
|
692
|
+
console.print("[cyan]🔍[/cyan] Running ML-based anomaly detection...")
|
|
693
|
+
|
|
694
|
+
try:
|
|
695
|
+
detector = AnomalyDetector(sensitivity=anomaly_sensitivity)
|
|
696
|
+
|
|
697
|
+
# Generate sample data for demonstration
|
|
698
|
+
_generate_anomaly_sample_data(detector, console)
|
|
699
|
+
|
|
700
|
+
# Generate analysis results
|
|
701
|
+
anomalies = detector.get_anomalies()
|
|
702
|
+
baselines = detector.get_baseline_summary()
|
|
703
|
+
|
|
704
|
+
# Display results
|
|
705
|
+
_display_anomaly_results(anomalies, baselines, console)
|
|
706
|
+
|
|
707
|
+
# Save report if requested
|
|
708
|
+
if anomaly_report:
|
|
709
|
+
_save_anomaly_report(
|
|
710
|
+
anomalies, baselines, anomaly_sensitivity, anomaly_report, console
|
|
711
|
+
)
|
|
712
|
+
|
|
713
|
+
return False # Exit after anomaly detection
|
|
714
|
+
|
|
715
|
+
except Exception as e:
|
|
716
|
+
console.print(f"[red]❌[/red] Anomaly detection failed: {e}")
|
|
717
|
+
return False
|
|
718
|
+
|
|
719
|
+
|
|
720
|
+
def _generate_predictive_sample_data(engine: t.Any) -> list[str]:
|
|
721
|
+
"""Generate sample historical data for predictive analytics."""
|
|
722
|
+
import random
|
|
723
|
+
from datetime import datetime, timedelta
|
|
724
|
+
|
|
725
|
+
base_time = datetime.now() - timedelta(hours=72) # 3 days of history
|
|
726
|
+
metric_types = [
|
|
727
|
+
"test_pass_rate",
|
|
728
|
+
"coverage_percentage",
|
|
729
|
+
"execution_time",
|
|
730
|
+
"memory_usage",
|
|
731
|
+
"complexity_score",
|
|
732
|
+
]
|
|
733
|
+
|
|
734
|
+
base_values = {
|
|
735
|
+
"test_pass_rate": 0.95,
|
|
736
|
+
"coverage_percentage": 0.85,
|
|
737
|
+
"execution_time": 120.0,
|
|
738
|
+
"memory_usage": 512.0,
|
|
739
|
+
"complexity_score": 10.0,
|
|
740
|
+
}
|
|
741
|
+
|
|
742
|
+
# Generate sample historical data
|
|
743
|
+
for metric_type in metric_types:
|
|
744
|
+
base_value = base_values[metric_type]
|
|
745
|
+
for i in range(48): # 48 hours of data points
|
|
746
|
+
timestamp = base_time + timedelta(hours=i)
|
|
747
|
+
# Add some trend and random variation
|
|
748
|
+
trend_factor = 1.0 + (i * 0.001) # Slight upward trend
|
|
749
|
+
noise = random.uniform(0.9, 1.1) # 10% noise
|
|
750
|
+
value = base_value * trend_factor * noise
|
|
751
|
+
engine.add_metric(metric_type, value, timestamp)
|
|
752
|
+
|
|
753
|
+
return metric_types
|
|
754
|
+
|
|
755
|
+
|
|
756
|
+
def _generate_predictions_summary(
|
|
757
|
+
engine: t.Any, metric_types: list[str], prediction_periods: int
|
|
758
|
+
) -> dict[str, t.Any]:
|
|
759
|
+
"""Generate predictions summary for all metric types."""
|
|
760
|
+
predictions_summary = {}
|
|
761
|
+
trend_summary = engine.get_trend_summary()
|
|
762
|
+
|
|
763
|
+
for metric_type in metric_types:
|
|
764
|
+
predictions = engine.predict_metric(metric_type, prediction_periods)
|
|
765
|
+
if predictions:
|
|
766
|
+
predictions_summary[metric_type] = {
|
|
767
|
+
"trend": trend_summary.get(metric_type, {}),
|
|
768
|
+
"predictions": [
|
|
769
|
+
{
|
|
770
|
+
"predicted_for": p.predicted_for.isoformat(),
|
|
771
|
+
"predicted_value": round(p.predicted_value, 3),
|
|
772
|
+
"confidence_interval": [
|
|
773
|
+
round(p.confidence_interval[0], 3),
|
|
774
|
+
round(p.confidence_interval[1], 3),
|
|
775
|
+
],
|
|
776
|
+
"model_accuracy": round(p.model_accuracy, 3),
|
|
777
|
+
}
|
|
778
|
+
for p in predictions[:5] # Show first 5 predictions
|
|
779
|
+
],
|
|
780
|
+
}
|
|
781
|
+
|
|
782
|
+
return predictions_summary
|
|
783
|
+
|
|
784
|
+
|
|
785
|
+
def _display_trend_analysis(
|
|
786
|
+
predictions_summary: dict[str, t.Any], console: Console
|
|
787
|
+
) -> None:
|
|
788
|
+
"""Display trend analysis summary."""
|
|
789
|
+
console.print("\n[green]📈[/green] Trend Analysis Summary:")
|
|
790
|
+
|
|
791
|
+
for metric_type, data in predictions_summary.items():
|
|
792
|
+
trend_info = data.get("trend", {})
|
|
793
|
+
direction = trend_info.get("trend_direction", "unknown")
|
|
794
|
+
strength = trend_info.get("trend_strength", 0)
|
|
795
|
+
|
|
796
|
+
direction_color = {
|
|
797
|
+
"increasing": "green",
|
|
798
|
+
"decreasing": "red",
|
|
799
|
+
"stable": "blue",
|
|
800
|
+
"volatile": "yellow",
|
|
801
|
+
}.get(direction, "white")
|
|
802
|
+
|
|
803
|
+
console.print(
|
|
804
|
+
f" • {metric_type}: [{direction_color}]{direction}[/{direction_color}] "
|
|
805
|
+
f"(strength: {strength:.2f})"
|
|
806
|
+
)
|
|
807
|
+
|
|
808
|
+
if data["predictions"]:
|
|
809
|
+
next_pred = data["predictions"][0]
|
|
810
|
+
console.print(
|
|
811
|
+
f" Next prediction: {next_pred['predicted_value']} "
|
|
812
|
+
f"(confidence: {next_pred['model_accuracy']:.2f})"
|
|
813
|
+
)
|
|
814
|
+
|
|
815
|
+
|
|
816
|
+
def _save_analytics_dashboard(
|
|
817
|
+
predictions_summary: dict[str, t.Any],
|
|
818
|
+
trend_summary: dict[str, t.Any],
|
|
819
|
+
metric_types: list[str],
|
|
820
|
+
prediction_periods: int,
|
|
821
|
+
analytics_dashboard: str,
|
|
822
|
+
console: Console,
|
|
823
|
+
) -> None:
|
|
824
|
+
"""Save analytics dashboard data to file."""
|
|
825
|
+
import json
|
|
826
|
+
from datetime import datetime
|
|
827
|
+
from pathlib import Path
|
|
828
|
+
|
|
829
|
+
dashboard_data = {
|
|
830
|
+
"timestamp": datetime.now().isoformat(),
|
|
831
|
+
"summary": {
|
|
832
|
+
"prediction_periods": prediction_periods,
|
|
833
|
+
"metrics_analyzed": len(metric_types),
|
|
834
|
+
"total_predictions": sum(
|
|
835
|
+
len(data["predictions"]) for data in predictions_summary.values()
|
|
836
|
+
),
|
|
837
|
+
},
|
|
838
|
+
"trends": trend_summary,
|
|
839
|
+
"predictions": predictions_summary,
|
|
840
|
+
}
|
|
841
|
+
|
|
842
|
+
dashboard_path = Path(analytics_dashboard)
|
|
843
|
+
dashboard_path.write_text(json.dumps(dashboard_data, indent=2), encoding="utf-8")
|
|
844
|
+
console.print(f"[green]✅[/green] Analytics dashboard saved to: {dashboard_path}")
|
|
845
|
+
|
|
846
|
+
|
|
847
|
+
def _handle_predictive_analytics(
|
|
848
|
+
predictive_analytics: bool,
|
|
849
|
+
prediction_periods: int,
|
|
850
|
+
analytics_dashboard: str | None,
|
|
851
|
+
console: Console,
|
|
852
|
+
) -> bool:
|
|
853
|
+
"""Handle predictive analytics and trend forecasting.
|
|
854
|
+
|
|
855
|
+
Returns True if execution should continue, False if should return early.
|
|
856
|
+
"""
|
|
857
|
+
if not predictive_analytics:
|
|
858
|
+
return True
|
|
859
|
+
|
|
860
|
+
from crackerjack.services.predictive_analytics import PredictiveAnalyticsEngine
|
|
861
|
+
|
|
862
|
+
console.print(
|
|
863
|
+
"[cyan]📊[/cyan] Running predictive analytics and trend forecasting..."
|
|
864
|
+
)
|
|
865
|
+
|
|
866
|
+
try:
|
|
867
|
+
engine = PredictiveAnalyticsEngine()
|
|
868
|
+
|
|
869
|
+
# Generate sample historical data
|
|
870
|
+
metric_types = _generate_predictive_sample_data(engine)
|
|
871
|
+
|
|
872
|
+
# Generate predictions
|
|
873
|
+
console.print(
|
|
874
|
+
f"[blue]🔮[/blue] Generating {prediction_periods} period predictions..."
|
|
875
|
+
)
|
|
876
|
+
|
|
877
|
+
predictions_summary = _generate_predictions_summary(
|
|
878
|
+
engine, metric_types, prediction_periods
|
|
879
|
+
)
|
|
880
|
+
trend_summary = engine.get_trend_summary()
|
|
881
|
+
|
|
882
|
+
# Display analysis results
|
|
883
|
+
_display_trend_analysis(predictions_summary, console)
|
|
884
|
+
|
|
885
|
+
# Save dashboard if requested
|
|
886
|
+
if analytics_dashboard:
|
|
887
|
+
_save_analytics_dashboard(
|
|
888
|
+
predictions_summary,
|
|
889
|
+
trend_summary,
|
|
890
|
+
metric_types,
|
|
891
|
+
prediction_periods,
|
|
892
|
+
analytics_dashboard,
|
|
893
|
+
console,
|
|
894
|
+
)
|
|
895
|
+
|
|
896
|
+
return False # Exit after predictive analytics
|
|
897
|
+
|
|
898
|
+
except Exception as e:
|
|
899
|
+
console.print(f"[red]❌[/red] Predictive analytics failed: {e}")
|
|
900
|
+
return False
|
|
901
|
+
|
|
902
|
+
|
|
903
|
+
def _handle_enterprise_optimizer(
|
|
904
|
+
enterprise_optimizer: bool,
|
|
905
|
+
enterprise_profile: str | None,
|
|
906
|
+
enterprise_report: str | None,
|
|
907
|
+
console: Console,
|
|
908
|
+
) -> bool:
|
|
909
|
+
"""Handle enterprise-scale optimization engine.
|
|
910
|
+
|
|
911
|
+
Returns True if execution should continue, False if should return early.
|
|
912
|
+
"""
|
|
913
|
+
if not enterprise_optimizer:
|
|
914
|
+
return True
|
|
915
|
+
|
|
916
|
+
console.print("[cyan]🏢[/cyan] Running enterprise-scale optimization analysis...")
|
|
917
|
+
|
|
918
|
+
try:
|
|
919
|
+
optimizer = _setup_enterprise_optimizer(enterprise_profile)
|
|
920
|
+
result = _run_enterprise_optimization(optimizer, console)
|
|
921
|
+
_display_enterprise_results(result, enterprise_report, console)
|
|
922
|
+
return False # Exit after enterprise optimization
|
|
923
|
+
|
|
924
|
+
except Exception as e:
|
|
925
|
+
console.print(f"[red]❌[/red] Enterprise optimizer error: {e}")
|
|
926
|
+
return False
|
|
927
|
+
|
|
928
|
+
|
|
929
|
+
def _setup_enterprise_optimizer(enterprise_profile: str | None) -> t.Any:
|
|
930
|
+
"""Setup enterprise optimizer with directories and profile."""
|
|
931
|
+
import tempfile
|
|
932
|
+
from pathlib import Path
|
|
933
|
+
|
|
934
|
+
from crackerjack.services.enterprise_optimizer import EnterpriseOptimizer
|
|
935
|
+
|
|
936
|
+
config_dir = Path.cwd() / ".crackerjack"
|
|
937
|
+
storage_dir = Path(tempfile.gettempdir()) / "crackerjack_storage"
|
|
938
|
+
optimizer = EnterpriseOptimizer(config_dir, storage_dir)
|
|
939
|
+
|
|
940
|
+
if enterprise_profile:
|
|
941
|
+
optimizer.performance_profile.optimization_strategy = enterprise_profile
|
|
942
|
+
|
|
943
|
+
return optimizer
|
|
944
|
+
|
|
945
|
+
|
|
946
|
+
def _run_enterprise_optimization(optimizer: t.Any, console: t.Any) -> t.Any:
|
|
947
|
+
"""Run the optimization cycle and return results."""
|
|
948
|
+
import asyncio
|
|
949
|
+
|
|
950
|
+
console.print("[blue]📊[/blue] Analyzing system resources and performance...")
|
|
951
|
+
return asyncio.run(optimizer.run_optimization_cycle())
|
|
952
|
+
|
|
953
|
+
|
|
954
|
+
def _display_enterprise_results(
|
|
955
|
+
result: t.Any, enterprise_report: str | None, console: t.Any
|
|
956
|
+
) -> None:
|
|
957
|
+
"""Display optimization results and save report if requested."""
|
|
958
|
+
if result["status"] == "success":
|
|
959
|
+
console.print(
|
|
960
|
+
"[green]✅[/green] Enterprise optimization completed successfully"
|
|
961
|
+
)
|
|
962
|
+
_display_enterprise_metrics(result["metrics"], console)
|
|
963
|
+
_display_enterprise_recommendations(result["recommendations"], console)
|
|
964
|
+
_save_enterprise_report(result, enterprise_report, console)
|
|
965
|
+
else:
|
|
966
|
+
console.print(
|
|
967
|
+
f"[red]❌[/red] Enterprise optimization failed: {result.get('message', 'Unknown error')}"
|
|
968
|
+
)
|
|
969
|
+
|
|
970
|
+
|
|
971
|
+
def _display_enterprise_metrics(metrics: t.Any, console: t.Any) -> None:
|
|
972
|
+
"""Display key system metrics."""
|
|
973
|
+
console.print(f"[blue]CPU Usage:[/blue] {metrics['cpu_percent']:.1f}%")
|
|
974
|
+
console.print(f"[blue]Memory Usage:[/blue] {metrics['memory_percent']:.1f}%")
|
|
975
|
+
console.print(f"[blue]Storage Usage:[/blue] {metrics['disk_usage_percent']:.1f}%")
|
|
976
|
+
|
|
977
|
+
|
|
978
|
+
def _display_enterprise_recommendations(recommendations: t.Any, console: t.Any) -> None:
|
|
979
|
+
"""Display optimization recommendations."""
|
|
980
|
+
if recommendations:
|
|
981
|
+
console.print(
|
|
982
|
+
f"\n[yellow]💡[/yellow] Found {len(recommendations)} optimization recommendations:"
|
|
983
|
+
)
|
|
984
|
+
for rec in recommendations[:3]: # Show top 3
|
|
985
|
+
priority_color = {"high": "red", "medium": "yellow", "low": "blue"}[
|
|
986
|
+
rec["priority"]
|
|
987
|
+
]
|
|
988
|
+
console.print(
|
|
989
|
+
f" [{priority_color}]{rec['priority'].upper()}[/{priority_color}]: {rec['title']}"
|
|
990
|
+
)
|
|
991
|
+
|
|
992
|
+
|
|
993
|
+
def _save_enterprise_report(
|
|
994
|
+
result: t.Any, enterprise_report: str | None, console: t.Any
|
|
995
|
+
) -> None:
|
|
996
|
+
"""Save enterprise report to file if requested."""
|
|
997
|
+
if enterprise_report:
|
|
998
|
+
import json
|
|
999
|
+
|
|
1000
|
+
with open(enterprise_report, "w") as f:
|
|
1001
|
+
json.dump(result, f, indent=2)
|
|
1002
|
+
console.print(
|
|
1003
|
+
f"[green]📄[/green] Enterprise report saved to: {enterprise_report}"
|
|
1004
|
+
)
|
|
1005
|
+
|
|
1006
|
+
|
|
1007
|
+
def _handle_mkdocs_integration(
|
|
1008
|
+
mkdocs_integration: bool,
|
|
1009
|
+
mkdocs_serve: bool,
|
|
1010
|
+
mkdocs_theme: str,
|
|
1011
|
+
mkdocs_output: str | None,
|
|
1012
|
+
console: Console,
|
|
1013
|
+
) -> bool:
|
|
1014
|
+
"""Handle MkDocs documentation site generation.
|
|
1015
|
+
|
|
1016
|
+
Returns True if execution should continue, False if should return early.
|
|
1017
|
+
"""
|
|
1018
|
+
if not mkdocs_integration:
|
|
1019
|
+
return True
|
|
1020
|
+
|
|
1021
|
+
console.print("[cyan]📚[/cyan] Generating MkDocs documentation site...")
|
|
1022
|
+
|
|
1023
|
+
try:
|
|
1024
|
+
services = _create_mkdocs_services()
|
|
1025
|
+
builder = services["builder"]
|
|
1026
|
+
output_dir = _determine_mkdocs_output_dir(mkdocs_output)
|
|
1027
|
+
docs_content = _create_sample_docs_content()
|
|
1028
|
+
|
|
1029
|
+
console.print(
|
|
1030
|
+
f"[blue]🏗️[/blue] Building documentation site with {mkdocs_theme} theme..."
|
|
1031
|
+
)
|
|
1032
|
+
|
|
1033
|
+
_build_mkdocs_site(builder, docs_content, output_dir, mkdocs_serve)
|
|
1034
|
+
site = None # _build_mkdocs_site returns None
|
|
1035
|
+
_handle_mkdocs_build_result(site, mkdocs_serve, console)
|
|
1036
|
+
|
|
1037
|
+
return False # Exit after MkDocs generation
|
|
1038
|
+
|
|
1039
|
+
except Exception as e:
|
|
1040
|
+
console.print(f"[red]❌[/red] MkDocs integration error: {e}")
|
|
1041
|
+
return False
|
|
1042
|
+
|
|
1043
|
+
|
|
1044
|
+
def _create_mkdocs_services() -> dict[str, t.Any]:
|
|
1045
|
+
"""Create and configure MkDocs services."""
|
|
1046
|
+
from logging import getLogger
|
|
1047
|
+
from pathlib import Path
|
|
1048
|
+
|
|
1049
|
+
from crackerjack.documentation.mkdocs_integration import (
|
|
1050
|
+
MkDocsIntegrationService,
|
|
1051
|
+
MkDocsSiteBuilder,
|
|
1052
|
+
)
|
|
1053
|
+
|
|
1054
|
+
# Create filesystem service that matches FileSystemServiceProtocol
|
|
1055
|
+
class SyncFileSystemService:
|
|
1056
|
+
def read_file(self, path: str | Path) -> str:
|
|
1057
|
+
return Path(path).read_text()
|
|
1058
|
+
|
|
1059
|
+
def write_file(self, path: str | Path, content: str) -> None:
|
|
1060
|
+
Path(path).write_text(content)
|
|
1061
|
+
|
|
1062
|
+
def exists(self, path: str | Path) -> bool:
|
|
1063
|
+
return Path(path).exists()
|
|
1064
|
+
|
|
1065
|
+
def mkdir(self, path: str | Path, parents: bool = False) -> None:
|
|
1066
|
+
Path(path).mkdir(parents=parents, exist_ok=True)
|
|
1067
|
+
|
|
1068
|
+
def ensure_directory(self, path: str | Path) -> None:
|
|
1069
|
+
Path(path).mkdir(parents=True, exist_ok=True)
|
|
1070
|
+
|
|
1071
|
+
# Create config manager that implements ConfigManagerProtocol
|
|
1072
|
+
class ConfigManager:
|
|
1073
|
+
def __init__(self) -> None:
|
|
1074
|
+
self._config: dict[str, t.Any] = {}
|
|
1075
|
+
|
|
1076
|
+
def get(self, key: str, default: t.Any = None) -> t.Any:
|
|
1077
|
+
return self._config.get(key, default)
|
|
1078
|
+
|
|
1079
|
+
def set(self, key: str, value: t.Any) -> None:
|
|
1080
|
+
self._config[key] = value
|
|
1081
|
+
|
|
1082
|
+
def save(self) -> bool:
|
|
1083
|
+
return True
|
|
1084
|
+
|
|
1085
|
+
def load(self) -> bool:
|
|
1086
|
+
return True
|
|
1087
|
+
|
|
1088
|
+
filesystem = SyncFileSystemService()
|
|
1089
|
+
config_manager = ConfigManager()
|
|
1090
|
+
logger = getLogger(__name__)
|
|
1091
|
+
|
|
1092
|
+
integration_service = MkDocsIntegrationService(config_manager, filesystem, logger)
|
|
1093
|
+
builder = MkDocsSiteBuilder(integration_service)
|
|
1094
|
+
|
|
1095
|
+
return {"builder": builder, "filesystem": filesystem, "config": config_manager}
|
|
1096
|
+
|
|
1097
|
+
|
|
1098
|
+
def _determine_mkdocs_output_dir(mkdocs_output: str | None) -> "Path":
|
|
1099
|
+
"""Determine the output directory for MkDocs site."""
|
|
1100
|
+
from pathlib import Path
|
|
1101
|
+
|
|
1102
|
+
return Path(mkdocs_output) if mkdocs_output else Path.cwd() / "docs_site"
|
|
1103
|
+
|
|
1104
|
+
|
|
1105
|
+
def _create_sample_docs_content() -> dict[str, str]:
|
|
1106
|
+
"""Create sample documentation content."""
|
|
1107
|
+
return {
|
|
1108
|
+
"index.md": "# Project Documentation\n\nWelcome to the project documentation.",
|
|
1109
|
+
"getting-started.md": "# Getting Started\n\nQuick start guide for the project.",
|
|
1110
|
+
"api-reference.md": "# API Reference\n\nAPI documentation and examples.",
|
|
1111
|
+
}
|
|
1112
|
+
|
|
1113
|
+
|
|
1114
|
+
def _build_mkdocs_site(
|
|
1115
|
+
builder: t.Any, docs_content: dict[str, str], output_dir: Path, serve: bool
|
|
1116
|
+
) -> None:
|
|
1117
|
+
"""Build the MkDocs documentation site."""
|
|
1118
|
+
import asyncio
|
|
1119
|
+
|
|
1120
|
+
asyncio.run(
|
|
1121
|
+
builder.build_documentation_site(
|
|
1122
|
+
project_name="Project Documentation",
|
|
1123
|
+
project_description="Comprehensive project documentation",
|
|
1124
|
+
author="Crackerjack",
|
|
1125
|
+
documentation_content=docs_content,
|
|
1126
|
+
output_dir=output_dir,
|
|
1127
|
+
serve=serve,
|
|
1128
|
+
)
|
|
1129
|
+
)
|
|
1130
|
+
|
|
1131
|
+
|
|
1132
|
+
def _handle_mkdocs_build_result(
|
|
1133
|
+
site: t.Any, mkdocs_serve: bool, console: Console
|
|
1134
|
+
) -> None:
|
|
1135
|
+
"""Handle the result of MkDocs site building."""
|
|
1136
|
+
if site:
|
|
1137
|
+
console.print(
|
|
1138
|
+
f"[green]✅[/green] MkDocs site generated successfully at: {site.build_path}"
|
|
1139
|
+
)
|
|
1140
|
+
console.print(
|
|
1141
|
+
f"[blue]📄[/blue] Generated {len(site.pages)} documentation pages"
|
|
1142
|
+
)
|
|
1143
|
+
|
|
1144
|
+
if mkdocs_serve:
|
|
1145
|
+
console.print(
|
|
1146
|
+
"[blue]🌐[/blue] MkDocs development server started at http://127.0.0.1:8000"
|
|
1147
|
+
)
|
|
1148
|
+
console.print("[yellow]Press Ctrl+C to stop the server[/yellow]")
|
|
1149
|
+
else:
|
|
1150
|
+
console.print("[red]❌[/red] Failed to generate MkDocs site")
|
|
1151
|
+
|
|
1152
|
+
|
|
1153
|
+
def _handle_contextual_ai(
|
|
1154
|
+
contextual_ai: bool,
|
|
1155
|
+
ai_recommendations: int,
|
|
1156
|
+
ai_help_query: str | None,
|
|
1157
|
+
console: Console,
|
|
1158
|
+
) -> bool:
|
|
1159
|
+
"""Handle contextual AI assistant features.
|
|
1160
|
+
|
|
1161
|
+
Returns True if execution should continue, False if should return early.
|
|
1162
|
+
"""
|
|
1163
|
+
if not contextual_ai and not ai_help_query:
|
|
1164
|
+
return True
|
|
1165
|
+
|
|
1166
|
+
from crackerjack.services.contextual_ai_assistant import ContextualAIAssistant
|
|
1167
|
+
|
|
1168
|
+
console.print("[cyan]🤖[/cyan] Running contextual AI assistant analysis...")
|
|
1169
|
+
|
|
1170
|
+
try:
|
|
1171
|
+
from pathlib import Path
|
|
1172
|
+
|
|
1173
|
+
# Create filesystem interface that implements FileSystemInterface protocol
|
|
1174
|
+
class FileSystemImpl:
|
|
1175
|
+
def read_file(self, path: str | t.Any) -> str:
|
|
1176
|
+
return Path(path).read_text()
|
|
1177
|
+
|
|
1178
|
+
def write_file(self, path: str | t.Any, content: str) -> None:
|
|
1179
|
+
Path(path).write_text(content)
|
|
1180
|
+
|
|
1181
|
+
def exists(self, path: str | t.Any) -> bool:
|
|
1182
|
+
return Path(path).exists()
|
|
1183
|
+
|
|
1184
|
+
def mkdir(self, path: str | t.Any, parents: bool = False) -> None:
|
|
1185
|
+
Path(path).mkdir(parents=parents, exist_ok=True)
|
|
1186
|
+
|
|
1187
|
+
filesystem = FileSystemImpl()
|
|
1188
|
+
assistant = ContextualAIAssistant(filesystem, console)
|
|
1189
|
+
|
|
1190
|
+
# Handle help query
|
|
1191
|
+
if ai_help_query:
|
|
1192
|
+
help_response = assistant.get_quick_help(ai_help_query)
|
|
1193
|
+
console.print(f"\n[blue]🔍[/blue] AI Help for '{ai_help_query}':")
|
|
1194
|
+
console.print(help_response)
|
|
1195
|
+
return False # Exit after help query
|
|
1196
|
+
|
|
1197
|
+
# Get contextual recommendations
|
|
1198
|
+
console.print(
|
|
1199
|
+
"[blue]🧠[/blue] Analyzing project context for AI recommendations..."
|
|
1200
|
+
)
|
|
1201
|
+
recommendations = assistant.get_contextual_recommendations(ai_recommendations)
|
|
1202
|
+
|
|
1203
|
+
if recommendations:
|
|
1204
|
+
assistant.display_recommendations(recommendations)
|
|
1205
|
+
else:
|
|
1206
|
+
console.print("[green]✨[/green] Great job! No immediate recommendations.")
|
|
1207
|
+
|
|
1208
|
+
return False # Exit after AI recommendations
|
|
1209
|
+
|
|
1210
|
+
except Exception as e:
|
|
1211
|
+
console.print(f"[red]❌[/red] Contextual AI error: {e}")
|
|
1212
|
+
return False
|
|
1213
|
+
|
|
1214
|
+
|
|
122
1215
|
@app.command()
|
|
123
1216
|
def main(
|
|
124
1217
|
commit: bool = CLI_OPTIONS["commit"],
|
|
@@ -130,8 +1223,8 @@ def main(
|
|
|
130
1223
|
publish: BumpOption | None = CLI_OPTIONS["publish"],
|
|
131
1224
|
all: BumpOption | None = CLI_OPTIONS["all"],
|
|
132
1225
|
bump: BumpOption | None = CLI_OPTIONS["bump"],
|
|
133
|
-
|
|
134
|
-
|
|
1226
|
+
strip_code: bool = CLI_OPTIONS["strip_code"],
|
|
1227
|
+
run_tests: bool = CLI_OPTIONS["run_tests"],
|
|
135
1228
|
benchmark: bool = CLI_OPTIONS["benchmark"],
|
|
136
1229
|
test_workers: int = CLI_OPTIONS["test_workers"],
|
|
137
1230
|
test_timeout: int = CLI_OPTIONS["test_timeout"],
|
|
@@ -139,7 +1232,7 @@ def main(
|
|
|
139
1232
|
fast: bool = CLI_OPTIONS["fast"],
|
|
140
1233
|
comp: bool = CLI_OPTIONS["comp"],
|
|
141
1234
|
create_pr: bool = CLI_OPTIONS["create_pr"],
|
|
142
|
-
|
|
1235
|
+
ai_fix: bool = CLI_OPTIONS["ai_fix"],
|
|
143
1236
|
start_mcp_server: bool = CLI_OPTIONS["start_mcp_server"],
|
|
144
1237
|
stop_mcp_server: bool = CLI_OPTIONS["stop_mcp_server"],
|
|
145
1238
|
restart_mcp_server: bool = CLI_OPTIONS["restart_mcp_server"],
|
|
@@ -153,6 +1246,14 @@ def main(
|
|
|
153
1246
|
stop_websocket_server: bool = CLI_OPTIONS["stop_websocket_server"],
|
|
154
1247
|
restart_websocket_server: bool = CLI_OPTIONS["restart_websocket_server"],
|
|
155
1248
|
websocket_port: int | None = CLI_OPTIONS["websocket_port"],
|
|
1249
|
+
start_zuban_lsp: bool = CLI_OPTIONS["start_zuban_lsp"],
|
|
1250
|
+
stop_zuban_lsp: bool = CLI_OPTIONS["stop_zuban_lsp"],
|
|
1251
|
+
restart_zuban_lsp: bool = CLI_OPTIONS["restart_zuban_lsp"],
|
|
1252
|
+
no_zuban_lsp: bool = CLI_OPTIONS["no_zuban_lsp"],
|
|
1253
|
+
zuban_lsp_port: int = CLI_OPTIONS["zuban_lsp_port"],
|
|
1254
|
+
zuban_lsp_mode: str = CLI_OPTIONS["zuban_lsp_mode"],
|
|
1255
|
+
zuban_lsp_timeout: int = CLI_OPTIONS["zuban_lsp_timeout"],
|
|
1256
|
+
enable_lsp_hooks: bool = CLI_OPTIONS["enable_lsp_hooks"],
|
|
156
1257
|
watchdog: bool = CLI_OPTIONS["watchdog"],
|
|
157
1258
|
monitor: bool = CLI_OPTIONS["monitor"],
|
|
158
1259
|
enhanced_monitor: bool = CLI_OPTIONS["enhanced_monitor"],
|
|
@@ -164,6 +1265,8 @@ def main(
|
|
|
164
1265
|
orchestration_ai_mode: str = CLI_OPTIONS["orchestration_ai_mode"],
|
|
165
1266
|
dev: bool = CLI_OPTIONS["dev"],
|
|
166
1267
|
dashboard: bool = CLI_OPTIONS["dashboard"],
|
|
1268
|
+
unified_dashboard: bool = CLI_OPTIONS["unified_dashboard"],
|
|
1269
|
+
unified_dashboard_port: int | None = CLI_OPTIONS["unified_dashboard_port"],
|
|
167
1270
|
max_iterations: int = CLI_OPTIONS["max_iterations"],
|
|
168
1271
|
coverage_status: bool = CLI_OPTIONS["coverage_status"],
|
|
169
1272
|
coverage_goal: float | None = CLI_OPTIONS["coverage_goal"],
|
|
@@ -175,7 +1278,47 @@ def main(
|
|
|
175
1278
|
global_lock_dir: str | None = CLI_OPTIONS["global_lock_dir"],
|
|
176
1279
|
quick: bool = CLI_OPTIONS["quick"],
|
|
177
1280
|
thorough: bool = CLI_OPTIONS["thorough"],
|
|
1281
|
+
clear_cache: bool = CLI_OPTIONS["clear_cache"],
|
|
1282
|
+
cache_stats: bool = CLI_OPTIONS["cache_stats"],
|
|
1283
|
+
generate_docs: bool = CLI_OPTIONS["generate_docs"],
|
|
1284
|
+
docs_format: str = CLI_OPTIONS["docs_format"],
|
|
1285
|
+
validate_docs: bool = CLI_OPTIONS["validate_docs"],
|
|
1286
|
+
generate_changelog: bool = CLI_OPTIONS["generate_changelog"],
|
|
1287
|
+
changelog_version: str | None = CLI_OPTIONS["changelog_version"],
|
|
1288
|
+
changelog_since: str | None = CLI_OPTIONS["changelog_since"],
|
|
1289
|
+
changelog_dry_run: bool = CLI_OPTIONS["changelog_dry_run"],
|
|
1290
|
+
auto_version: bool = CLI_OPTIONS["auto_version"],
|
|
1291
|
+
version_since: str | None = CLI_OPTIONS["version_since"],
|
|
1292
|
+
accept_version: bool = CLI_OPTIONS["accept_version"],
|
|
1293
|
+
smart_commit: bool = CLI_OPTIONS["smart_commit"],
|
|
1294
|
+
heatmap: bool = CLI_OPTIONS["heatmap"],
|
|
1295
|
+
heatmap_type: str = CLI_OPTIONS["heatmap_type"],
|
|
1296
|
+
heatmap_output: str | None = CLI_OPTIONS["heatmap_output"],
|
|
1297
|
+
anomaly_detection: bool = CLI_OPTIONS["anomaly_detection"],
|
|
1298
|
+
anomaly_sensitivity: float = CLI_OPTIONS["anomaly_sensitivity"],
|
|
1299
|
+
anomaly_report: str | None = CLI_OPTIONS["anomaly_report"],
|
|
1300
|
+
predictive_analytics: bool = CLI_OPTIONS["predictive_analytics"],
|
|
1301
|
+
prediction_periods: int = CLI_OPTIONS["prediction_periods"],
|
|
1302
|
+
analytics_dashboard: str | None = CLI_OPTIONS["analytics_dashboard"],
|
|
1303
|
+
# Enterprise features
|
|
1304
|
+
enterprise_optimizer: bool = CLI_OPTIONS["enterprise_optimizer"],
|
|
1305
|
+
enterprise_profile: str | None = CLI_OPTIONS["enterprise_profile"],
|
|
1306
|
+
enterprise_report: str | None = CLI_OPTIONS["enterprise_report"],
|
|
1307
|
+
mkdocs_integration: bool = CLI_OPTIONS["mkdocs_integration"],
|
|
1308
|
+
mkdocs_serve: bool = CLI_OPTIONS["mkdocs_serve"],
|
|
1309
|
+
mkdocs_theme: str = CLI_OPTIONS["mkdocs_theme"],
|
|
1310
|
+
mkdocs_output: str | None = CLI_OPTIONS["mkdocs_output"],
|
|
1311
|
+
contextual_ai: bool = CLI_OPTIONS["contextual_ai"],
|
|
1312
|
+
ai_recommendations: int = CLI_OPTIONS["ai_recommendations"],
|
|
1313
|
+
ai_help_query: str | None = CLI_OPTIONS["ai_help_query"],
|
|
1314
|
+
# Configuration management features
|
|
1315
|
+
check_config_updates: bool = CLI_OPTIONS["check_config_updates"],
|
|
1316
|
+
apply_config_updates: bool = CLI_OPTIONS["apply_config_updates"],
|
|
1317
|
+
diff_config: str | None = CLI_OPTIONS["diff_config"],
|
|
1318
|
+
config_interactive: bool = CLI_OPTIONS["config_interactive"],
|
|
1319
|
+
refresh_cache: bool = CLI_OPTIONS["refresh_cache"],
|
|
178
1320
|
) -> None:
|
|
1321
|
+
"""Main CLI entry point with complexity <= 15."""
|
|
179
1322
|
options = create_options(
|
|
180
1323
|
commit,
|
|
181
1324
|
interactive,
|
|
@@ -184,10 +1327,7 @@ def main(
|
|
|
184
1327
|
verbose,
|
|
185
1328
|
debug,
|
|
186
1329
|
publish,
|
|
187
|
-
all,
|
|
188
1330
|
bump,
|
|
189
|
-
clean,
|
|
190
|
-
test,
|
|
191
1331
|
benchmark,
|
|
192
1332
|
test_workers,
|
|
193
1333
|
test_timeout,
|
|
@@ -195,11 +1335,18 @@ def main(
|
|
|
195
1335
|
fast,
|
|
196
1336
|
comp,
|
|
197
1337
|
create_pr,
|
|
198
|
-
ai_agent,
|
|
199
1338
|
async_mode,
|
|
200
1339
|
experimental_hooks,
|
|
201
1340
|
enable_pyrefly,
|
|
202
1341
|
enable_ty,
|
|
1342
|
+
start_zuban_lsp,
|
|
1343
|
+
stop_zuban_lsp,
|
|
1344
|
+
restart_zuban_lsp,
|
|
1345
|
+
no_zuban_lsp,
|
|
1346
|
+
zuban_lsp_port,
|
|
1347
|
+
zuban_lsp_mode,
|
|
1348
|
+
zuban_lsp_timeout,
|
|
1349
|
+
enable_lsp_hooks,
|
|
203
1350
|
no_git_tags,
|
|
204
1351
|
skip_version_check,
|
|
205
1352
|
orchestrated,
|
|
@@ -208,6 +1355,8 @@ def main(
|
|
|
208
1355
|
orchestration_ai_mode,
|
|
209
1356
|
dev,
|
|
210
1357
|
dashboard,
|
|
1358
|
+
unified_dashboard,
|
|
1359
|
+
unified_dashboard_port,
|
|
211
1360
|
max_iterations,
|
|
212
1361
|
coverage_status,
|
|
213
1362
|
coverage_goal,
|
|
@@ -219,43 +1368,210 @@ def main(
|
|
|
219
1368
|
global_lock_dir,
|
|
220
1369
|
quick,
|
|
221
1370
|
thorough,
|
|
1371
|
+
clear_cache,
|
|
1372
|
+
cache_stats,
|
|
1373
|
+
generate_docs,
|
|
1374
|
+
docs_format,
|
|
1375
|
+
validate_docs,
|
|
1376
|
+
generate_changelog,
|
|
1377
|
+
changelog_version,
|
|
1378
|
+
changelog_since,
|
|
1379
|
+
changelog_dry_run,
|
|
1380
|
+
auto_version,
|
|
1381
|
+
version_since,
|
|
1382
|
+
accept_version,
|
|
1383
|
+
smart_commit,
|
|
1384
|
+
heatmap,
|
|
1385
|
+
heatmap_type,
|
|
1386
|
+
heatmap_output,
|
|
1387
|
+
anomaly_detection,
|
|
1388
|
+
anomaly_sensitivity,
|
|
1389
|
+
anomaly_report,
|
|
1390
|
+
predictive_analytics,
|
|
1391
|
+
prediction_periods,
|
|
1392
|
+
analytics_dashboard,
|
|
1393
|
+
# Enterprise features
|
|
1394
|
+
enterprise_optimizer,
|
|
1395
|
+
enterprise_profile,
|
|
1396
|
+
enterprise_report,
|
|
1397
|
+
mkdocs_integration,
|
|
1398
|
+
mkdocs_serve,
|
|
1399
|
+
mkdocs_theme,
|
|
1400
|
+
mkdocs_output,
|
|
1401
|
+
contextual_ai,
|
|
1402
|
+
ai_recommendations,
|
|
1403
|
+
ai_help_query,
|
|
1404
|
+
check_config_updates,
|
|
1405
|
+
apply_config_updates,
|
|
1406
|
+
diff_config,
|
|
1407
|
+
config_interactive,
|
|
1408
|
+
refresh_cache,
|
|
1409
|
+
# New semantic parameters use defaults
|
|
1410
|
+
run_tests=run_tests,
|
|
222
1411
|
)
|
|
223
1412
|
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
# Update the options object to reflect the verbose setting
|
|
228
|
-
options.verbose = True
|
|
229
|
-
|
|
230
|
-
# If debug flag is set, enable verbose mode as well
|
|
231
|
-
if debug:
|
|
232
|
-
verbose = True
|
|
233
|
-
options.verbose = True
|
|
1413
|
+
# Setup debug and verbose flags
|
|
1414
|
+
ai_fix, verbose = _setup_debug_and_verbose_flags(ai_debug, debug, verbose, options)
|
|
1415
|
+
setup_ai_agent_env(ai_fix, ai_debug or debug)
|
|
234
1416
|
|
|
235
|
-
|
|
236
|
-
|
|
237
|
-
if _handle_server_commands(
|
|
238
|
-
monitor,
|
|
239
|
-
enhanced_monitor,
|
|
240
|
-
dashboard,
|
|
241
|
-
watchdog,
|
|
242
|
-
start_websocket_server,
|
|
243
|
-
stop_websocket_server,
|
|
244
|
-
restart_websocket_server,
|
|
245
|
-
start_mcp_server,
|
|
246
|
-
stop_mcp_server,
|
|
247
|
-
restart_mcp_server,
|
|
248
|
-
websocket_port,
|
|
249
|
-
dev,
|
|
250
|
-
):
|
|
1417
|
+
# Process all commands - returns True if should continue to main workflow
|
|
1418
|
+
if not _process_all_commands(locals(), console, options):
|
|
251
1419
|
return
|
|
252
1420
|
|
|
1421
|
+
# Execute main workflow (interactive or standard mode)
|
|
253
1422
|
if interactive:
|
|
254
1423
|
handle_interactive_mode(options)
|
|
255
1424
|
else:
|
|
256
1425
|
handle_standard_mode(options, async_mode, job_id, orchestrated)
|
|
257
1426
|
|
|
258
1427
|
|
|
1428
|
+
def _process_all_commands(local_vars: t.Any, console: t.Any, options: t.Any) -> bool:
|
|
1429
|
+
"""Process all command-line commands and return True if should continue to main workflow."""
|
|
1430
|
+
# Handle cache management commands early (they exit after execution)
|
|
1431
|
+
if _handle_cache_commands(
|
|
1432
|
+
local_vars["clear_cache"], local_vars["cache_stats"], console
|
|
1433
|
+
):
|
|
1434
|
+
return False
|
|
1435
|
+
|
|
1436
|
+
# Handle configuration management commands early (they exit after execution)
|
|
1437
|
+
if (
|
|
1438
|
+
local_vars["check_config_updates"]
|
|
1439
|
+
or local_vars["apply_config_updates"]
|
|
1440
|
+
or local_vars["diff_config"]
|
|
1441
|
+
or local_vars["refresh_cache"]
|
|
1442
|
+
):
|
|
1443
|
+
handle_config_updates(options)
|
|
1444
|
+
return False
|
|
1445
|
+
|
|
1446
|
+
# Handle server commands (monitoring, websocket, MCP, zuban LSP)
|
|
1447
|
+
if _handle_server_commands(
|
|
1448
|
+
local_vars["monitor"],
|
|
1449
|
+
local_vars["enhanced_monitor"],
|
|
1450
|
+
local_vars["dashboard"],
|
|
1451
|
+
local_vars["unified_dashboard"],
|
|
1452
|
+
local_vars["unified_dashboard_port"],
|
|
1453
|
+
local_vars["watchdog"],
|
|
1454
|
+
local_vars["start_websocket_server"],
|
|
1455
|
+
local_vars["stop_websocket_server"],
|
|
1456
|
+
local_vars["restart_websocket_server"],
|
|
1457
|
+
local_vars["start_mcp_server"],
|
|
1458
|
+
local_vars["stop_mcp_server"],
|
|
1459
|
+
local_vars["restart_mcp_server"],
|
|
1460
|
+
local_vars["websocket_port"],
|
|
1461
|
+
local_vars["start_zuban_lsp"],
|
|
1462
|
+
local_vars["stop_zuban_lsp"],
|
|
1463
|
+
local_vars["restart_zuban_lsp"],
|
|
1464
|
+
local_vars["zuban_lsp_port"],
|
|
1465
|
+
local_vars["zuban_lsp_mode"],
|
|
1466
|
+
local_vars["dev"],
|
|
1467
|
+
):
|
|
1468
|
+
return False
|
|
1469
|
+
|
|
1470
|
+
# Handle documentation and analysis commands
|
|
1471
|
+
return _handle_analysis_commands(local_vars, console, options)
|
|
1472
|
+
|
|
1473
|
+
|
|
1474
|
+
def _handle_analysis_commands(
|
|
1475
|
+
local_vars: t.Any, console: t.Any, options: t.Any
|
|
1476
|
+
) -> bool:
|
|
1477
|
+
"""Handle documentation and analysis commands."""
|
|
1478
|
+
# Handle documentation commands
|
|
1479
|
+
if not _handle_documentation_commands(
|
|
1480
|
+
local_vars["generate_docs"], local_vars["validate_docs"], console, options
|
|
1481
|
+
):
|
|
1482
|
+
return False
|
|
1483
|
+
|
|
1484
|
+
# Handle changelog commands
|
|
1485
|
+
if not _handle_changelog_commands(
|
|
1486
|
+
local_vars["generate_changelog"],
|
|
1487
|
+
local_vars["changelog_dry_run"],
|
|
1488
|
+
local_vars["changelog_version"],
|
|
1489
|
+
local_vars["changelog_since"],
|
|
1490
|
+
console,
|
|
1491
|
+
options,
|
|
1492
|
+
):
|
|
1493
|
+
return False
|
|
1494
|
+
|
|
1495
|
+
# Handle version analysis
|
|
1496
|
+
if not _handle_version_analysis(
|
|
1497
|
+
local_vars["auto_version"],
|
|
1498
|
+
local_vars["version_since"],
|
|
1499
|
+
local_vars["accept_version"],
|
|
1500
|
+
console,
|
|
1501
|
+
options,
|
|
1502
|
+
):
|
|
1503
|
+
return False
|
|
1504
|
+
|
|
1505
|
+
# Handle specialized analytics
|
|
1506
|
+
return _handle_specialized_analytics(local_vars, console)
|
|
1507
|
+
|
|
1508
|
+
|
|
1509
|
+
def _handle_specialized_analytics(local_vars: t.Any, console: t.Any) -> bool:
|
|
1510
|
+
"""Handle specialized analytics and enterprise features."""
|
|
1511
|
+
# Handle heatmap generation
|
|
1512
|
+
if not _handle_heatmap_generation(
|
|
1513
|
+
local_vars["heatmap"],
|
|
1514
|
+
local_vars["heatmap_type"],
|
|
1515
|
+
local_vars["heatmap_output"],
|
|
1516
|
+
console,
|
|
1517
|
+
):
|
|
1518
|
+
return False
|
|
1519
|
+
|
|
1520
|
+
# Handle anomaly detection
|
|
1521
|
+
if not _handle_anomaly_detection(
|
|
1522
|
+
local_vars["anomaly_detection"],
|
|
1523
|
+
local_vars["anomaly_sensitivity"],
|
|
1524
|
+
local_vars["anomaly_report"],
|
|
1525
|
+
console,
|
|
1526
|
+
):
|
|
1527
|
+
return False
|
|
1528
|
+
|
|
1529
|
+
# Handle predictive analytics
|
|
1530
|
+
if not _handle_predictive_analytics(
|
|
1531
|
+
local_vars["predictive_analytics"],
|
|
1532
|
+
local_vars["prediction_periods"],
|
|
1533
|
+
local_vars["analytics_dashboard"],
|
|
1534
|
+
console,
|
|
1535
|
+
):
|
|
1536
|
+
return False
|
|
1537
|
+
|
|
1538
|
+
# Handle enterprise features
|
|
1539
|
+
return _handle_enterprise_features(local_vars, console)
|
|
1540
|
+
|
|
1541
|
+
|
|
1542
|
+
def _handle_enterprise_features(local_vars: t.Any, console: t.Any) -> bool:
|
|
1543
|
+
"""Handle enterprise features."""
|
|
1544
|
+
# Handle enterprise optimizer
|
|
1545
|
+
if not _handle_enterprise_optimizer(
|
|
1546
|
+
local_vars["enterprise_optimizer"],
|
|
1547
|
+
local_vars["enterprise_profile"],
|
|
1548
|
+
local_vars["enterprise_report"],
|
|
1549
|
+
console,
|
|
1550
|
+
):
|
|
1551
|
+
return False
|
|
1552
|
+
|
|
1553
|
+
# Handle MkDocs integration
|
|
1554
|
+
if not _handle_mkdocs_integration(
|
|
1555
|
+
local_vars["mkdocs_integration"],
|
|
1556
|
+
local_vars["mkdocs_serve"],
|
|
1557
|
+
local_vars["mkdocs_theme"],
|
|
1558
|
+
local_vars["mkdocs_output"],
|
|
1559
|
+
console,
|
|
1560
|
+
):
|
|
1561
|
+
return False
|
|
1562
|
+
|
|
1563
|
+
# Handle contextual AI
|
|
1564
|
+
if not _handle_contextual_ai(
|
|
1565
|
+
local_vars["contextual_ai"],
|
|
1566
|
+
local_vars["ai_recommendations"],
|
|
1567
|
+
local_vars["ai_help_query"],
|
|
1568
|
+
console,
|
|
1569
|
+
):
|
|
1570
|
+
return False
|
|
1571
|
+
|
|
1572
|
+
return True
|
|
1573
|
+
|
|
1574
|
+
|
|
259
1575
|
def cli() -> None:
|
|
260
1576
|
app()
|
|
261
1577
|
|